bitkeeper revision 1.825.3.1 (4062f7e9e4Hjc12XFoN-wZ-bm0GL4w)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 25 Mar 2004 15:16:57 +0000 (15:16 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 25 Mar 2004 15:16:57 +0000 (15:16 +0000)
synch_bitops.h:
  new file
system.h, evtchn.h, evtchn.c, entry.S, console.c:
  Fix races in event-channel status checks and updates.

.rootkeys
xen/arch/i386/entry.S
xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c
xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S
xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c
xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h
xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h [new file with mode: 0644]
xenolinux-2.4.25-sparse/include/asm-xen/system.h

index e6eaf701f0944779c3ffbb9ad24ab614bae047b1..2caa01cd046d9459c2b5f00bb547852e1e1001e7 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3e5a4e68uJz-xI0IBVMD7xRLQKJDFg xenolinux-2.4.25-sparse/include/asm-xen/segment.h
 3e5a4e68Nfdh6QcOKUTGCaYkf2LmYA xenolinux-2.4.25-sparse/include/asm-xen/smp.h
 3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ xenolinux-2.4.25-sparse/include/asm-xen/suspend.h
+4062f7e2PzFOUGT0PaE7A0VprTU3JQ xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h
 3e5a4e68mTr0zcp9SXDbnd-XLrrfxw xenolinux-2.4.25-sparse/include/asm-xen/system.h
 3f1056a9L_kqHcFheV00KbKBzv9j5w xenolinux-2.4.25-sparse/include/asm-xen/vga.h
 3f689063nhrIRsMMZjZxMFk7iEINqQ xenolinux-2.4.25-sparse/include/asm-xen/xen_proc.h
index af7c7fa6f55aeace9c91f9dc9f575567e4035754..8db1f2007455bf13da2e7f0f1db6cef84ecf6823 100644 (file)
@@ -373,7 +373,7 @@ test_all_events:
         andl UPCALL_PENDING(%eax),%ecx  # ECX = pending & ~mask
         andl $1,%ecx                    # Is bit 0 pending and not masked?
         jz   restore_all_guest
-        orl  %ecx,UPCALL_MASK(%eax)     # Upcalls are masked during delivery
+        lock btsl $0,UPCALL_MASK(%eax)  # Upcalls are masked during delivery
 /*process_guest_events:*/
         movzwl PROCESSOR(%ebx),%edx
         shl  $4,%edx                    # sizeof(guest_trap_bounce) == 16
index c55cd0146415fc2abd7f5dcb336e4c375437825c..8b76e8ab4e2625bfe0faf03cbd1b02fa41d54c08 100644 (file)
@@ -144,12 +144,7 @@ void xen_console_init(void)
 
     register_console(&kcons_info);
 
-    /*
-     * XXX This prevents a bogus 'VIRQ_ERROR' when interrupts are enabled
-     * for the first time. This works because by this point all important
-     * VIRQs (eg. timer) have been properly bound.
-     */
-    clear_bit(0, &HYPERVISOR_shared_info->evtchn_pending[0]);
+    evtchn_clear_error_virq();
 }
 
 
index 5f8dcae2fef9a71f3249279e441c5d4371d51fda..22ec1f1b46d5256107ae1eb099bc4d7b26ecc491 100644 (file)
@@ -210,14 +210,14 @@ ENTRY(system_call)
        movl %eax,EAX(%esp)             # save the return value
 ENTRY(ret_from_sys_call)
         movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
-        btsl $0,evtchn_upcall_mask(%esi) # make tests atomic
+        lock btsl $0,evtchn_upcall_mask(%esi) # make tests atomic
 ret_syscall_tests:
        cmpl $0,need_resched(%ebx)
        jne reschedule
        cmpl $0,sigpending(%ebx)
        je   safesti                    # ensure need_resched updates are seen
 signal_return:
-       btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+       lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
        movl %esp,%eax
        xorl %edx,%edx
        call SYMBOL_NAME(do_signal)
@@ -254,7 +254,7 @@ ret_from_exception:
 
        ALIGN
 reschedule:
-        btrl $0,evtchn_upcall_mask(%esi)       # reenable event callbacks
+        lock btrl $0,evtchn_upcall_mask(%esi)  # reenable event callbacks
        call SYMBOL_NAME(schedule)             # test
        jmp ret_from_sys_call
 
@@ -317,12 +317,12 @@ ENTRY(hypervisor_callback)
         movb CS(%esp),%cl
        test $2,%cl          # slow return to ring 2 or 3
        jne  ret_syscall_tests
-safesti:btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+safesti:lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
 scrit:  /**** START OF CRITICAL REGION ****/
         testb $1,evtchn_upcall_pending(%esi)
         jnz  14f              # process more events if necessary...
         RESTORE_ALL
-14:     btsl $0,evtchn_upcall_mask(%esi)
+14:     lock btsl $0,evtchn_upcall_mask(%esi)
         jmp  11b
 ecrit:  /**** END OF CRITICAL REGION ****/
 # [How we do the fixup]. We want to merge the current stack frame with the
@@ -364,7 +364,7 @@ critical_fixup_table:
         .byte 0x20                            # pop  %es
         .byte 0x24,0x24,0x24                  # add  $4,%esp
         .byte 0x28                            # iret
-        .byte 0x00,0x00,0x00,0x00,0x00        # btsl $0,4(%esi)
+        .byte 0x00,0x00,0x00,0x00,0x00,0x00   # lock btsl $0,4(%esi)
         .byte 0x00,0x00                       # jmp  11b
 
 # Hypervisor uses this for application faults while it executes.
index d312bf0d4f712c5dbda2f0bfa7702480dbff8837..266867fc74bdadeb3d2bb173412f969272ea69d9 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/atomic.h>
 #include <asm/system.h>
 #include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
 #include <asm/hypervisor.h>
 #include <asm/hypervisor-ifs/event_channel.h>
 
@@ -84,7 +85,7 @@ static void evtchn_handle_exceptions(shared_info_t *s, struct pt_regs *regs)
             {
                 printk(KERN_ALERT "Error on IRQ line %d!\n", 
                        dynirq + DYNIRQ_BASE);
-                clear_bit(port, &s->evtchn_exception[0]);
+                synch_clear_bit(port, &s->evtchn_exception[0]);
             }
             else
                 evtchn_device_upcall(port, 1);
@@ -99,7 +100,7 @@ void evtchn_do_upcall(struct pt_regs *regs)
 
     local_irq_save(flags);
     
-    while ( test_and_clear_bit(0, &s->evtchn_upcall_pending) )
+    while ( synch_test_and_clear_bit(0, &s->evtchn_upcall_pending) )
     {
         if ( s->evtchn_pending_sel != 0 )
             evtchn_handle_normal(s, regs);
index 2aea319dd52921534582124553be3c7dd2879656..fd52b97009f57d72b08b33d8b88e0b63c8162185 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/config.h>
 #include <asm/hypervisor.h>
 #include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
 
 /*
  * LOW-LEVEL DEFINITIONS
@@ -27,21 +28,15 @@ void evtchn_device_upcall(int port, int exception);
 static inline void mask_evtchn(int port)
 {
     shared_info_t *s = HYPERVISOR_shared_info;
-    set_bit(port, &s->evtchn_mask[0]);
+    synch_set_bit(port, &s->evtchn_mask[0]);
 }
 
-/*
- * I haven't thought too much about the synchronisation in here against
- * other CPUs, but all the bit-update operations are reorder barriers on
- * x86 so reordering concerns aren't a problem for now. Some mb() calls
- * would be required on weaker architectures I think. -- KAF (24/3/2004)
- */
 static inline void unmask_evtchn(int port)
 {
     shared_info_t *s = HYPERVISOR_shared_info;
     int need_upcall = 0;
 
-    clear_bit(port, &s->evtchn_mask[0]);
+    synch_clear_bit(port, &s->evtchn_mask[0]);
 
     /*
      * The following is basically the equivalent of 'hw_resend_irq'. Just like
@@ -49,34 +44,43 @@ static inline void unmask_evtchn(int port)
      */
 
     /* Asserted a standard notification? */
-    if (  test_bit        (port,    &s->evtchn_pending[0]) && 
-         !test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
+         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
         need_upcall = 1;
 
     /* Asserted an exceptional notification? */
-    if (  test_bit        (port,    &s->evtchn_exception[0]) && 
-         !test_and_set_bit(port>>5, &s->evtchn_exception_sel) )
+    if (  synch_test_bit        (port,    &s->evtchn_exception[0]) && 
+         !synch_test_and_set_bit(port>>5, &s->evtchn_exception_sel) )
         need_upcall = 1;
 
     /* If asserted either type of notification, check the master flags. */
     if ( need_upcall &&
-         !test_and_set_bit(0,       &s->evtchn_upcall_pending) &&
-         !test_bit        (0,       &s->evtchn_upcall_mask) )
+         !synch_test_and_set_bit(0,       &s->evtchn_upcall_pending) &&
+         !synch_test_bit        (0,       &s->evtchn_upcall_mask) )
         evtchn_do_upcall(NULL);
 }
 
 static inline void clear_evtchn(int port)
 {
     shared_info_t *s = HYPERVISOR_shared_info;
-    clear_bit(port, &s->evtchn_pending[0]);
+    synch_clear_bit(port, &s->evtchn_pending[0]);
 }
 
 static inline void clear_evtchn_exception(int port)
 {
     shared_info_t *s = HYPERVISOR_shared_info;
-    clear_bit(port, &s->evtchn_exception[0]);
+    synch_clear_bit(port, &s->evtchn_exception[0]);
 }
 
+static inline void evtchn_clear_error_virq(void)
+{
+    /*
+     * XXX This prevents a bogus 'VIRQ_ERROR' when interrupts are enabled
+     * for the first time. This works because by this point all important
+     * VIRQs (eg. timer) have been properly bound.
+     */
+    synch_clear_bit(0, &HYPERVISOR_shared_info->evtchn_pending[0]);
+}
 
 /*
  * CHARACTER-DEVICE DEFINITIONS
diff --git a/xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h b/xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h
new file mode 100644 (file)
index 0000000..8093de0
--- /dev/null
@@ -0,0 +1,83 @@
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ * Heavily modified to provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ */
+
+#include <linux/config.h>
+
+#define ADDR (*(volatile long *) addr)
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ ( 
+        "lock btsl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ (
+        "lock btrl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_change_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ (
+        "lock btcl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btsl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btrl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+
+    __asm__ __volatile__ (
+        "lock btcl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+    return ((1UL << (nr & 31)) & 
+            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "btl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+    return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
index 2c1194a7817587ed80a8bfdbd3503a1e52600f20..8237063f59d150f46fa8fdf12f4a9259b682b415 100644 (file)
@@ -4,9 +4,10 @@
 #include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/synch_bitops.h>
 #include <asm/segment.h>
 #include <asm/hypervisor.h>
-#include <linux/bitops.h> /* for LOCK_PREFIX */
 #include <asm/evtchn.h>
 
 #ifdef __KERNEL__
@@ -250,19 +251,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        unsigned long prev;
        switch (size) {
        case 1:
-               __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+               __asm__ __volatile__("lock cmpxchgb %b1,%2"
                                     : "=a"(prev)
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
                                     : "memory");
                return prev;
        case 2:
-               __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+               __asm__ __volatile__("lock cmpxchgw %w1,%2"
                                     : "=a"(prev)
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
                                     : "memory");
                return prev;
        case 4:
-               __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+               __asm__ __volatile__("lock cmpxchgl %1,%2"
                                     : "=a"(prev)
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
                                     : "memory");
@@ -320,49 +321,47 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
+#define safe_halt()             ((void)0)
+
 /*
- * NB. ALl the following routines are SMP-safe on x86, even where they look
- * possibly racy. For example, we must ensure that we clear the mask bit and
- * /then/ check teh pending bit. But this will happen because the bit-update
- * operations are ordering barriers.
- * 
- * For this reason also, many uses of 'barrier' here are rather anal. But
- * they do no harm.
+ * Note the use of synch_*_bit() operations in the following. These operations
+ * ensure correct serialisation of checks and updates w.r.t. Xen executing on
+ * a different CPU.
  */
 
 #define __cli()                                                               \
 do {                                                                          \
-    set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask);                  \
-    barrier();                                                                \
+    synch_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask);            \
 } while (0)
 
 #define __sti()                                                               \
 do {                                                                          \
     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    clear_bit(0, &_shared->evtchn_upcall_mask);                               \
-    barrier();                                                                \
-    if ( unlikely(test_bit(0, &_shared->evtchn_upcall_pending)) )             \
+    synch_clear_bit(0, &_shared->evtchn_upcall_mask);                         \
+    if ( unlikely(synch_test_bit(0, &_shared->evtchn_upcall_pending)) )       \
         evtchn_do_upcall(NULL);                                               \
 } while (0)
 
 #define __save_flags(x)                                                       \
 do {                                                                          \
-    (x) = test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask);           \
-    barrier();                                                                \
+    (x) = synch_test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask);     \
 } while (0)
 
-#define __restore_flags(x)      do { if (x) __cli(); else __sti(); } while (0)
-
-#define safe_halt()             ((void)0)
+#define __restore_flags(x) do { if (x) __cli(); else __sti(); } while (0)
 
-#define __save_and_cli(x)      do { __save_flags(x); __cli(); } while(0);
-#define __save_and_sti(x)      do { __save_flags(x); __sti(); } while(0);
+#define __save_and_cli(x)                                                     \
+do {                                                                          \
+    (x) = synch_test_and_set_bit(                                             \
+        0, &HYPERVISOR_shared_info->evtchn_upcall_mask);                      \
+} while (0)
 
-#define local_irq_save(x)                                                     \
+#define __save_and_sti(x)                                                     \
 do {                                                                          \
-    (x) = test_and_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask);   \
-    barrier();                                                                \
+    (x) = synch_test_and_clear_bit(                                           \
+        0, &HYPERVISOR_shared_info->evtchn_upcall_mask);                      \
 } while (0)
+
+#define local_irq_save(x)       __save_and_cli(x)
 #define local_irq_restore(x)    __restore_flags(x)
 #define local_irq_disable()     __cli()
 #define local_irq_enable()      __sti()